In this report, we reproduce the analyses in the fMRI study 1.

prep data

First, we load the relevant packages, define functions and plotting aesthetics, and load and tidy the data.

load packages

library(pacman)
pacman::p_load(tidyverse, purrr, fs, knitr, lmerTest, ggeffects, kableExtra, boot, devtools, install = TRUE)
devtools::install_github("hadley/emo")

define functions

source("https://gist.githubusercontent.com/benmarwick/2a1bb0133ff568cbe28d/raw/fb53bd97121f7f9ce947837ef1a4c65a73bffb3f/geom_flat_violin.R")

# MLM results table function
table_model = function(model_data, print = TRUE) {
  table = model_data %>%
    broom.mixed::tidy(conf.int = TRUE) %>%
    filter(effect == "fixed") %>%
    rename("SE" = std.error,
           "t" = statistic,
           "p" = p.value) %>%
    select(-group, -effect) %>%
    mutate_at(vars(-contains("term"), -contains("p")), round, 2) %>%
    mutate(term = gsub("cond", "", term),
           term = gsub("\\(Intercept\\)", "intercept", term),
           term = gsub("condother", "other", term),
           term = gsub("condself", "self", term),
           term = gsub("siteUSA", "sample (USA)", term),
           term = gsub("self_referential", "self-referential", term),
           term = gsub("self_relevance_z", "self-relevance", term),
           term = gsub("social_relevance_z", "social relevance", term),
           term = gsub(":", " x ", term),
           p = ifelse(p < .001, "< .001",
               ifelse(p > .999, "1.000", gsub("0.(.*)", ".\\1", sprintf("%.3f", p)))),
           `b [95% CI]` = sprintf("%.2f [%0.2f, %.2f]", estimate, conf.low, conf.high)) %>%
    select(term, `b [95% CI]`, df, t, p)
  
  if (isTRUE(print)) {
    table  %>%
      kable() %>%
      kableExtra::kable_styling()
  } else {
    table
  }
}

simple_slopes = function(model, var, moderator, continuous = TRUE) {
  
  if (isTRUE(continuous)) {
    emmeans::emtrends(model, as.formula(paste("~", moderator)), var = var) %>%
      data.frame() %>%
      rename("trend" = 2) %>%
      mutate(`b [95% CI]` = sprintf("%.2f [%.2f, %.2f]", trend, asymp.LCL, asymp.UCL)) %>%
      select(!!moderator, `b [95% CI]`) %>%
      kable()  %>%
      kableExtra::kable_styling()
    
  } else {
    confint(emmeans::contrast(emmeans::emmeans(model, as.formula(paste("~", var, "|", moderator))), "revpairwise", by = moderator, adjust = "none")) %>%
      data.frame() %>%
      filter(grepl("control", contrast)) %>%
      mutate(`b [95% CI]` = sprintf("%.2f [%.2f, %.2f]", estimate, asymp.LCL, asymp.UCL)) %>%
      select(contrast, !!moderator, `b [95% CI]`) %>%
      arrange(contrast) %>%
      kable()  %>%
      kableExtra::kable_styling()
  }
}

define aesthetics

palette_condition = c("self" = "#ee9b00",
                      "control" = "#bb3e03",
                      "other" = "#005f73")
palette_roi = c("self-referential" = "#ee9b00",
               "mentalizing" = "#005f73")
palette_dv = c("self-relevance" = "#ee9b00",
               "social relevance" = "#005f73",
               "sharing" = "#56282D")
palette_sample = c("Netherlands" = "#027EA1",
                 "USA" = "#334456")

plot_aes = theme_minimal() +
  theme(legend.position = "top",
        legend.text = element_text(size = 12),
        text = element_text(size = 16, family = "Futura Medium"),
        panel.grid.major = element_blank(),
        panel.grid.minor = element_blank(),
        axis.text = element_text(color = "black"),
        axis.line = element_line(colour = "black"),
        axis.ticks.y = element_blank())

load and tidy data

merged_all = read.csv("../data/study1_data.csv")

ratings_z = merged_all %>%
  select(pID, event, trial, self_relevance, social_relevance) %>%
  unique() %>%
  mutate(self_relevance_z = scale(self_relevance, center = TRUE, scale = TRUE),
         social_relevance_z = scale(social_relevance, center = TRUE, scale = TRUE))

merged = merged_all %>%
  filter(outlier == "no" | is.na(outlier)) %>%
  filter(atlas %in% c("self-referential", "mentalizing")) %>%
  group_by(pID, atlas) %>%
  mutate(parameter_estimate_std = parameter_estimate / sd(parameter_estimate, na.rm = TRUE)) %>%
  left_join(., ratings_z)

merged_wide = merged %>%
  select(pID, site, trial, cond, value, self_relevance, self_relevance_z, social_relevance, social_relevance_z, atlas, parameter_estimate_std) %>%
  spread(atlas, parameter_estimate_std) %>%
  rename("self_referential" = `self-referential`)

quality check

Check the data quality and identify missing data

check number of participants

merged_wide %>%
  select(pID, site) %>%
  group_by(site) %>%
  unique() %>%
  summarize(n = n()) %>%
  arrange(n) %>%
  rename("sample" = site) %>%
  kable(digits = 2) %>%
  kableExtra::kable_styling()
sample n
Netherlands 40
USA 45

check number of trials

Print participant IDs who have < 72 trials

merged_wide %>%
  group_by(pID) %>%
  summarize(n = n()) %>%
  filter(n < 72) %>%
  arrange(n) %>%
  kable(digits = 2) %>%
  kableExtra::kable_styling()
pID n
BPP65 59
BPA34 62
BPP52 62
BPA23 63
BPP21 63
BPP05 66
BPA45 67
BPP61 67
BPA29 68
BPA47 68
BPP64 68
BPA04 69
BPP56 69
BPA12 70
BPP20 70
BPP58 70
BPA02 71
BPA05 71
BPA08 71
BPA16 71
BPA26 71
BPA27 71
BPA31 71
BPA32 71
BPA33 71
BPA35 71
BPA37 71
BPA38 71
BPA46 71
BPP22 71
BPP67 71

check missing response data

Print participant IDs who have > 0 missing responses

merged_wide %>%
  filter(is.na(value)) %>%
  group_by(pID) %>%
  summarize(n = n()) %>%
  arrange(-n) %>%
  kable(digits = 2) %>%
  kableExtra::kable_styling()
pID n
BPA10 12
BPA35 12
BPP21 10
BPA45 9
BPA12 8
BPA33 4
BPP60 3
BPP20 2
BPP26 2
BPP56 2
BPP66 2
BPA02 1
BPA03 1
BPA04 1
BPA08 1
BPA27 1
BPA32 1
BPP12 1
BPP15 1
BPP29 1
BPP33 1
BPP47 1
BPP49 1
BPP65 1

check global signal

These plots are before outliers were excluded

all trials

merged_all %>%
  ggplot(aes("", global_mean, fill = cond)) +
  geom_flat_violin(position = position_nudge(x = .15, y = 0), color = FALSE, alpha = .5) +
  coord_flip() +
  geom_point(aes(color = cond), position = position_jitter(width = .05), size = .1, alpha = .2) + 
  geom_boxplot(width = .1, outlier.shape = NA, color = "black", position = position_dodge(.15)) +
  scale_fill_manual(values = palette_condition) +
  scale_color_manual(values = palette_condition) +
  scale_x_discrete(expand = c(0, .1)) +
  labs(x = "") + 
  plot_aes

individual averages

merged_all %>%
  group_by(pID, cond) %>%
  summarize(global_mean = mean(global_mean, na.rm = TRUE)) %>%
  ggplot(aes("", global_mean, fill = cond)) +
  geom_flat_violin(position = position_nudge(x = .15, y = 0), color = FALSE, alpha = .5) +
  coord_flip() +
  geom_point(aes(color = cond), position = position_jitter(width = .05), size = 1, alpha = .5) + 
  geom_boxplot(width = .1, outlier.shape = NA, color = "black", position = position_dodge(.15)) +
  scale_fill_manual(values = palette_condition) +
  scale_color_manual(values = palette_condition) +
  scale_x_discrete(expand = c(0, .1)) +
  labs(x = "") + 
  plot_aes

number of outliers

merged_all %>%
  group_by(outlier) %>%
  summarize(n = n()) %>%
  spread(outlier, n) %>%
  mutate(percent = round((yes / (yes + no)) * 100, 1))



descriptives

Summarize means, SDs, and correlations between the ROIs

ratings

merged_wide %>%
  gather(variable, value, value, self_relevance, social_relevance) %>%
  group_by(variable) %>%
  summarize(M = mean(value, na.rm = TRUE),
            SD = sd(value, na.rm = TRUE)) %>%
  mutate(variable = ifelse(variable == "self_relevance", "self-relevance",
                    ifelse(variable == "social_relevance", "social relevance", "sharing intention"))) %>%
  kable(digits = 2) %>%
  kableExtra::kable_styling()
variable M SD
self-relevance 2.57 1.02
social relevance 2.67 0.96
sharing intention 2.62 1.02

ROI activity

merged_wide %>%
  gather(variable, value, mentalizing, self_referential) %>%
  group_by(variable) %>%
  summarize(M = mean(value, na.rm = TRUE),
            SD = sd(value, na.rm = TRUE)) %>%
  kable(digits = 2) %>%
  kableExtra::kable_styling()
variable M SD
mentalizing 0.37 1.10
self_referential 0.14 1.11

ROI correlations

Correlation between self-referential and mentalizing ROIs. Given the high correlations, we also report sensitivity analyses with alternative, less highly correlated ROIs. Note, we do not include both ROIs in the same model, so multicollinearity is not an issue.

merged %>%
  select(pID, trial, cond, atlas, parameter_estimate) %>%
  spread(atlas, parameter_estimate) %>%
  rmcorr::rmcorr(as.factor(pID), mentalizing, `self-referential`, data = .)
## 
## Repeated measures correlation
## 
## r
## 0.9382227
## 
## degrees of freedom
## 5928
## 
## p-value
## 0
## 
## 95% confidence interval
## 0.9351 0.9411998

replication analyses

H1

Is greater activity in the ROIs associated with higher self and social relevance ratings?

self-referential ROI

✅ H1a: Greater activity in the self-referential ROI will be associated with higher self-relevance ratings

mod_h1a =  lmer(self_relevance ~ self_referential + (1 + self_referential | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_model(mod_h1a)
term b [95% CI] df t p
intercept 2.56 [2.48, 2.64] 84.10 66.03 < .001
self-referential 0.05 [0.02, 0.07] 82.76 3.68 < .001

summary

summary(mod_h1a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_relevance ~ self_referential + (1 + self_referential | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16749.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4362 -0.7057  0.1481  0.6856  2.3548 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr 
##  pID      (Intercept)      0.114216 0.33796       
##           self_referential 0.001684 0.04104  -0.76
##  Residual                  0.916509 0.95734       
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##                  Estimate Std. Error       df t value             Pr(>|t|)    
## (Intercept)       2.55948    0.03876 84.09759  66.029 < 0.0000000000000002 ***
## self_referential  0.04796    0.01304 82.76293   3.679             0.000416 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## self_rfrntl -0.294

mentalizing ROI

✅ H1b: Greater activity in the mentalizing ROI will be associated with higher social relevance ratings

mod_h1b = lmer(social_relevance ~ mentalizing + (1 + mentalizing | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_model(mod_h1b)
term b [95% CI] df t p
intercept 2.66 [2.57, 2.74] 84.49 63.77 < .001
mentalizing 0.05 [0.02, 0.07] 83.18 3.80 < .001

summary

summary(mod_h1b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: social_relevance ~ mentalizing + (1 + mentalizing | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 15831.5
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.8332 -0.7255  0.1643  0.6507  2.6739 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr 
##  pID      (Intercept) 0.134404 0.36661       
##           mentalizing 0.002549 0.05049  -0.11
##  Residual             0.781733 0.88416       
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##             Estimate Std. Error       df t value             Pr(>|t|)    
## (Intercept)  2.65619    0.04165 84.49481   63.77 < 0.0000000000000002 ***
## mentalizing  0.04800    0.01263 83.18393    3.80             0.000274 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## mentalizing -0.139

combined plot

predicted_h1 = ggeffects::ggpredict(mod_h1a, c("self_referential [-4.5:5]")) %>%
  data.frame() %>%
  mutate(roi = "self-referential",
         variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h1b, c("mentalizing [-4.5:5]")) %>%
              data.frame() %>%
              mutate(roi = "mentalizing",
                     variable = "social relevance"))

predicted_sub_h1 = ggeffects::ggpredict(mod_h1a, terms = c("self_referential [-4.5:5]", "pID"), type = "random") %>%
  data.frame() %>%
  mutate(roi = "self-referential",
         variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h1b, c("mentalizing [-4.5:5]", "pID"), type = "random") %>%
              data.frame() %>%
              mutate(roi = "mentalizing",
                     variable = "social relevance"))

predicted_h1 %>%
  ggplot(aes(x, predicted)) +
  stat_smooth(data = predicted_sub_h1, aes(group = group, color = roi), geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high, fill = roi), alpha = .5, color = NA) +
  geom_line(aes(color = roi), size = 2) +
  facet_grid(~variable) +
  scale_color_manual(name = "", values = palette_roi, guide = FALSE) +
  scale_fill_manual(name = "", values = palette_roi, guide = FALSE) +
  labs(x = "\nROI activity (SD)", y = "predicted rating\n") +
  plot_aes

H2

Do the manipulations increase relevance?

self-relevance

❌ H2a: Self-focused intervention (compared to control) will increase self-relevance

mod_h2a = lmer(self_relevance ~ cond + (1 | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_model(mod_h2a)
term b [95% CI] df t p
intercept 2.55 [2.47, 2.64] 122.78 60.72 < .001
other 0.01 [-0.05, 0.07] 5927.28 0.21 .837
self 0.03 [-0.03, 0.09] 5927.34 1.09 .276

summary

summary(mod_h2a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_relevance ~ cond + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16772.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4268 -0.7132  0.1659  0.6723  2.3425 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1114   0.3337  
##  Residual             0.9205   0.9594  
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##                Estimate  Std. Error          df t value            Pr(>|t|)    
## (Intercept)    2.554630    0.042071  122.778484  60.722 <0.0000000000000002 ***
## condother      0.006223    0.030305 5927.281838   0.205               0.837    
## condself       0.033030    0.030313 5927.343524   1.090               0.276    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr) cndthr
## condother -0.360       
## condself  -0.360  0.500

social relevance

❌ H2b: Other-focused intervention (compared to control) will increase social relevance

mod_h2b = lmer(social_relevance ~ cond + (1 | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_model(mod_h2b)
term b [95% CI] df t p
intercept 2.64 [2.56, 2.73] 112.07 59.89 < .001
other 0.05 [-0.01, 0.10] 5927.24 1.63 .104
self 0.05 [-0.01, 0.10] 5927.29 1.62 .106

summary

summary(mod_h2b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: social_relevance ~ cond + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 15851.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.7797 -0.7200  0.1774  0.6544  2.6801 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1324   0.3638  
##  Residual             0.7864   0.8868  
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##               Estimate Std. Error         df t value            Pr(>|t|)    
## (Intercept)    2.64451    0.04416  112.07347  59.888 <0.0000000000000002 ***
## condother      0.04555    0.02801 5927.24106   1.626               0.104    
## condself       0.04530    0.02802 5927.28673   1.617               0.106    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr) cndthr
## condother -0.317       
## condself  -0.317  0.500

combined plot

predicted_h2 = ggeffects::ggpredict(mod_h2a, c("cond")) %>%
  data.frame() %>%
  mutate(model = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h2b, c("cond")) %>%
              data.frame() %>%
              mutate(model = "social relevance")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))

predicted_sub_h2 = ggeffects::ggpredict(mod_h2a, terms = c("cond", "pID"), type = "random") %>%
  data.frame() %>%
  mutate(model = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h2b, c("cond", "pID"), type = "random") %>%
              data.frame() %>%
              mutate(model = "social relevance")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))

predicted_h2 %>%
  ggplot(aes(x = x, y = predicted)) +
  stat_summary(data = predicted_sub_h2, aes(group = group), fun = "mean", geom = "line",
               size = .08, color = "grey50") +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1) +
  geom_pointrange(aes(color = x, ymin = conf.low, ymax = conf.high), size = .5) +
  facet_grid(~model) +
  scale_color_manual(name = "", values = palette_condition, guide = "none") +
  scale_alpha_manual(name = "", values = c(1, .5)) +
  labs(x = "", y = "predicted rating\n") +
  plot_aes +
  theme(legend.position = c(.85, .15))

H3

Is greater self and social relevance associated with higher sharing intentions?

✅ H1a: Greater self-relevance ratings will be associated with higher sharing intentions

✅ H1a: Greater social relevance ratings will be associated with higher sharing intentions

mod_h3 = lmer(value ~ self_relevance_z + social_relevance_z + (1 + self_relevance_z + social_relevance_z | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

plot

predicted_h3 = ggeffects::ggpredict(mod_h3, c("self_relevance_z")) %>%
  data.frame() %>%
  mutate(variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h3, c("social_relevance_z")) %>%
              data.frame() %>%
              mutate(variable = "social relevance"))

predicted_sub_h3 = ggeffects::ggpredict(mod_h3, terms = c("self_relevance_z", "pID"), type = "random") %>%
  data.frame() %>%
  mutate(variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h3, c("social_relevance_z", "pID"), type = "random") %>%
              data.frame() %>%
              mutate(variable = "social relevance"))

predicted_h3 %>%
  ggplot(aes(x, predicted)) +
  stat_smooth(data = predicted_sub_h3, aes(group = group, color = variable),
              geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high, fill = variable), alpha = .2, color = NA) +
  geom_line(aes(color = variable), size = 1.5) +
  facet_grid(~variable) +
  scale_color_manual(name = "", values = palette_dv[1:2]) +
  scale_fill_manual(name = "", values = palette_dv[1:2]) +
  labs(x = "\nrelevance rating", y = "predicted sharing intention rating\n") +
  plot_aes +
    theme(legend.position = "none")

model table

table_model(mod_h3)
term b [95% CI] df t p
intercept 2.63 [2.58, 2.69] 83.46 95.86 < .001
self-relevance 0.31 [0.27, 0.35] 85.60 15.59 < .001
social relevance 0.24 [0.19, 0.29] 82.45 9.75 < .001

summary

summary(mod_h3)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: 
## value ~ self_relevance_z + social_relevance_z + (1 + self_relevance_z +  
##     social_relevance_z | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 14904.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3650 -0.7051  0.0600  0.6939  3.0503 
## 
## Random effects:
##  Groups   Name               Variance Std.Dev. Corr       
##  pID      (Intercept)        0.05280  0.2298              
##           self_relevance_z   0.01218  0.1103   -0.39      
##           social_relevance_z 0.02941  0.1715    0.21 -0.55
##  Residual                    0.68662  0.8286              
## Number of obs: 5935, groups:  pID, 85
## 
## Fixed effects:
##                    Estimate Std. Error       df t value             Pr(>|t|)
## (Intercept)         2.63303    0.02747 83.46378  95.857 < 0.0000000000000002
## self_relevance_z    0.30924    0.01983 85.60222  15.592 < 0.0000000000000002
## social_relevance_z  0.24092    0.02470 82.44618   9.755  0.00000000000000216
##                       
## (Intercept)        ***
## self_relevance_z   ***
## social_relevance_z ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) slf_r_
## slf_rlvnc_z -0.220       
## scl_rlvnc_z  0.144 -0.596

preregistered analyses

Link to the preregistration

Deviations:

  • removed condition slope as a random effect in the following models because they did not converge in H5


H4

Do the manipulations increase neural activity in brain regions associated with self-referential processing and mentalizing?

self-referential ROI

✅ H4a: Self-focused intervention (compared to control) will increase brain activity in ROIs related to self-referential processes.

mod_h4a = lmer(self_referential ~ cond + (1 + cond | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_model(mod_h4a)
term b [95% CI] df t p
intercept 0.08 [-0.03, 0.20] 84.07 1.46 .147
other 0.09 [0.01, 0.16] 83.53 2.19 .032
self 0.09 [0.00, 0.17] 83.67 2.06 .043

summary

summary(mod_h4a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_referential ~ cond + (1 + cond | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 17285
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.7918 -0.6605  0.0028  0.6473  3.6030 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr       
##  pID      (Intercept) 0.23308  0.4828              
##           condother   0.04602  0.2145   -0.18      
##           condself    0.07364  0.2714   -0.07  0.59
##  Residual             0.97964  0.9898              
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)  
## (Intercept)  0.08318    0.05685 84.06811   1.463   0.1471  
## condother    0.08524    0.03898 83.53399   2.187   0.0316 *
## condself     0.08831    0.04295 83.66777   2.056   0.0429 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr) cndthr
## condother -0.321       
## condself  -0.246  0.534

mentalizing ROI

❌ H4b: Other-focused intervention (compared to control) will increase brain activity in ROIs related to mentalizing processes.

The other condition is associated with increased activation in the mentalizing ROI, but the relationship is not statistically significant.

mod_h4b = lmer(mentalizing ~ cond + (1 + cond | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_model(mod_h4b)
term b [95% CI] df t p
intercept 0.33 [0.22, 0.44] 84.10 5.93 < .001
other 0.06 [-0.02, 0.14] 83.34 1.58 .117
self 0.07 [-0.01, 0.16] 83.73 1.72 .089

summary

summary(mod_h4b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: mentalizing ~ cond + (1 + cond | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 17288.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.6193 -0.6570  0.0214  0.6732  3.3254 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr       
##  pID      (Intercept) 0.21885  0.4678              
##           condother   0.03877  0.1969   -0.19      
##           condself    0.06933  0.2633   -0.05  0.61
##  Residual             0.98228  0.9911              
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##             Estimate Std. Error       df t value     Pr(>|t|)    
## (Intercept)  0.32819    0.05537 84.09838   5.928 0.0000000656 ***
## condother    0.05999    0.03790 83.34488   1.583       0.1173    
## condself     0.07296    0.04239 83.72979   1.721       0.0889 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr) cndthr
## condother -0.331       
## condself  -0.240  0.537

combined plot

predicted_h4 = ggeffects::ggpredict(mod_h4a, c("cond")) %>%
  data.frame() %>%
  mutate(atlas = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h4b, c("cond")) %>%
              data.frame() %>%
              mutate(atlas = "mentalizing")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

predicted_sub_h4 = ggeffects::ggpredict(mod_h4a, terms = c("cond", "pID"), type = "random") %>%
  data.frame() %>%
  mutate(atlas = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h4b, c("cond", "pID"), type = "random") %>%
              data.frame() %>%
              mutate(atlas = "mentalizing")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

predicted_h4 %>%
  ggplot(aes(x = x, y = predicted)) +
  stat_summary(data = predicted_sub_h4, aes(group = group), fun = "mean", geom = "line",
               size = .1, color = "grey50") +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1) +
  geom_pointrange(aes(color = x, ymin = conf.low, ymax = conf.high), size = .75) +
  facet_grid(~atlas) +
  scale_color_manual(name = "", values = palette_condition, guide = "none") +
  scale_alpha_manual(name = "", values = c(1, .5)) +
  labs(x = "", y = "predicted ROI activity (SD)\n") +
  plot_aes

H5

Do the manipulations increase sharing intentions?

❌ H5a: Self-focused intervention (compared to control) will increase sharing intentions

❌ H5b: Other-focused intervention (compared to control) will increase sharing intentions

mod_h5 = lmer(value ~ cond + (1 | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

plot

predicted_h5 = ggeffects::ggpredict(mod_h5, c("cond")) %>%
  data.frame() %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))

predicted_sub_h5 = ggeffects::ggpredict(mod_h5, terms = c("cond", "pID"), type = "random") %>%
  data.frame() %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))
  
predicted_h5 %>%
  ggplot(aes(x = x, y = predicted)) +
  stat_summary(data = predicted_sub_h5, aes(group = group), fun = "mean", geom = "line",
               size = .25, color = "grey50") +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1.5) +
  geom_pointrange(aes(color = x, ymin = conf.low, ymax = conf.high), size = 1.5) +
  scale_color_manual(name = "", values = palette_condition, guide = "none") +
  scale_alpha_manual(name = "", values = c(1, .5)) +
  labs(x = "", y = "predicted sharing intention\n") +
  plot_aes +
  theme(legend.position = c(.85, .15))

model table

table_model(mod_h5)
term b [95% CI] df t p
intercept 2.65 [2.56, 2.73] 126.10 63.68 < .001
other -0.03 [-0.09, 0.03] 5848.55 -1.06 .290
self -0.04 [-0.11, 0.02] 5848.60 -1.45 .147

summary

summary(mod_h5)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ cond + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16672.7
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5571 -0.7077  0.1147  0.7259  2.0377 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1063   0.3261  
##  Residual             0.9399   0.9695  
## Number of obs: 5935, groups:  pID, 85
## 
## Fixed effects:
##               Estimate Std. Error         df t value            Pr(>|t|)    
## (Intercept)    2.64574    0.04155  126.10176  63.680 <0.0000000000000002 ***
## condother     -0.03265    0.03082 5848.54880  -1.059               0.290    
## condself      -0.04468    0.03084 5848.59677  -1.449               0.147    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr) cndthr
## condother -0.371       
## condself  -0.371  0.500

H6

Is ROI activity positively related to sharing intentions?

self-referential ROI

✅ H6a: Stronger activity in the self-referential ROI will be related to higher sharing intentions.

mod_h6a = lmer(value ~ self_referential + (1 + self_referential | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_model(mod_h6a)
term b [95% CI] df t p
intercept 2.61 [2.53, 2.68] 84.43 68.73 < .001
self-referential 0.08 [0.06, 0.11] 81.64 6.11 < .001

summary

summary(mod_h6a)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ self_referential + (1 + self_referential | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16625
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5959 -0.7247  0.1135  0.7333  2.2539 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr 
##  pID      (Intercept)      0.108148 0.32886       
##           self_referential 0.002504 0.05004  -0.22
##  Residual                  0.930334 0.96454       
## Number of obs: 5935, groups:  pID, 85
## 
## Fixed effects:
##                  Estimate Std. Error       df t value             Pr(>|t|)    
## (Intercept)       2.60694    0.03793 84.42967  68.725 < 0.0000000000000002 ***
## self_referential  0.08314    0.01360 81.63664   6.113         0.0000000319 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## self_rfrntl -0.128

mentalizing ROI

✅ H6b: Stronger activation in the mentalizing ROI will be related to higher sharing intentions.

mod_h6b = lmer(value ~ mentalizing + (1 + mentalizing | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_model(mod_h6b)
term b [95% CI] df t p
intercept 2.59 [2.52, 2.67] 85.39 67.94 < .001
mentalizing 0.07 [0.05, 0.10] 81.87 5.48 < .001

summary

summary(mod_h6b)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ mentalizing + (1 + mentalizing | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16635.5
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5853 -0.7230  0.1157  0.7363  2.1999 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr 
##  pID      (Intercept) 0.108020 0.32866       
##           mentalizing 0.002117 0.04602  -0.11
##  Residual             0.932277 0.96555       
## Number of obs: 5935, groups:  pID, 85
## 
## Fixed effects:
##             Estimate Std. Error       df t value             Pr(>|t|)    
## (Intercept)  2.59181    0.03815 85.39359  67.938 < 0.0000000000000002 ***
## mentalizing  0.07367    0.01345 81.87396   5.477          0.000000465 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr)
## mentalizing -0.151

combined plot

vals = seq(-4.5, 4.5, .1)

predicted_h6 = ggeffects::ggpredict(mod_h6a, c("self_referential [vals]")) %>%
  data.frame() %>%
  mutate(roi = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h6b, c("mentalizing [vals]")) %>%
              data.frame() %>%
              mutate(roi = "mentalizing")) %>%
  mutate(roi = factor(roi, levels = c("self-referential", "mentalizing")))

predicted_sub_h6 = ggeffects::ggpredict(mod_h6a, terms = c("self_referential [vals]", "pID"), type = "random") %>%
  data.frame() %>%
  mutate(roi = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h6b, c("mentalizing [vals]", "pID"), type = "random") %>%
              data.frame() %>%
              mutate(roi = "mentalizing")) %>%
  mutate(roi = factor(roi, levels = c("self-referential", "mentalizing")))

predicted_h6 %>%
  ggplot(aes(x = x, y = predicted, color = roi, fill = roi)) +
  stat_smooth(data = predicted_sub_h6, aes(group = group), geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .2, color = NA) +
  geom_line(size = 2) +
  facet_grid(~roi) +
  scale_color_manual(name = "", values = palette_roi) +
  scale_fill_manual(name = "", values = palette_roi) +
  labs(y = "predicted sharing intention\n", x = "\nROI activity (SD)") +
  plot_aes +
  theme(legend.position = "none")

H7

Is there an indirect effect of the condition on sharing intentions through activity in self-referential and mentalizing ROIs?

prep data

# source functions
source("indirectMLM.R")

# create self condition dataframe
data_med_self = merged_wide %>%
  filter(!cond == "other") %>%
  mutate(cond = ifelse(cond == "self", 1, 0)) %>%
  select(pID, site, trial, cond, value, self_referential) %>%
  data.frame()

# create social condition dataframe
data_med_other = merged_wide %>%
  filter(!cond == "self") %>%
  mutate(cond = ifelse(cond == "other", 1, 0)) %>%
  select(pID, site, trial, cond, value, mentalizing) %>%
  data.frame()

# define variables
y_var = "value"

self condition

✅ H7a: The effect of Self-focused intervention on sharing intention is mediated by increased activity in the self-referential ROI.

model_name = "mediation_self"
data = data_med_self

if (file.exists(sprintf("models/model_%s.RDS", model_name))) {
  assign(get("model_name"), readRDS(sprintf("models/model_%s.RDS", model_name)))
} else {
  assign(get("model_name"), boot(data = data, statistic = indirect.mlm, R = 500,
                                 y = y_var, x = "cond", mediator = "self_referential", group.id = "pID",
                                 between.m = F, uncentered.x = F))
  saveRDS(eval(parse(text = model_name)), sprintf("models/model_%s.RDS", model_name))
}

indirect.mlm.summary(get(model_name))
## #### Population Covariance ####
## Covariance of Random Slopes a and b: 0.001 [-0.002, 0.011]
## 
## 
## #### Indirect Effects ####
## # Within-subject Effects
## Unbiased Estimate of Within-subjects Indirect Effect: 0.009 [0.003, 0.02]
## Biased Estimate of Within-subjects Indirect Effect: 0.007 [0.002, 0.013]
## Bias in Within-subjects Indirect Effect: 0.001 [0, 0.011]
## 
## 
## #### Total Effect ####
## Unbiased Estimate of Total Effect: -0.046 [-0.11, 0.01]
## Biased Total Effect of X on Y (c path): -0.044 [-0.109, 0.012]
## Bias in Total Effect: 0.002 [0, 0.006]
## 
## 
## #### Direct Effects ####
## Direct Effect of Predictor on Dependent Variable (c' path): -0.055 [-0.121, 0]
## Within-subjects Effect of Predictor on Mediator (a path for group-mean centered predictor): 0.088 [0.024, 0.146]
## Within-subjects Effect of Mediator on Dependent Variable (b path for group-mean centered mediator): 0.084 [0.05, 0.113]

other condition

❌ H7b: The effect of Other-focused intervention on sharing intention is mediated by increased activity in the mentalizing ROI.

model_name = "mediation_other"
data = data_med_other

if (file.exists(sprintf("models/model_%s.RDS", model_name))) {
  assign(get("model_name"), readRDS(sprintf("models/model_%s.RDS", model_name)))
} else {
  assign(get("model_name"), boot(data = data, statistic = indirect.mlm, R = 500,
                                 y = y_var, x = "cond", mediator = "mentalizing", group.id = "pID",
                                 between.m = F, uncentered.x = F))
  saveRDS(eval(parse(text = model_name)), sprintf("models/model_%s.RDS", model_name))
}

indirect.mlm.summary(get(model_name))
## #### Population Covariance ####
## Covariance of Random Slopes a and b: -0.001 [-0.005, 0.007]
## 
## 
## #### Indirect Effects ####
## # Within-subject Effects
## Unbiased Estimate of Within-subjects Indirect Effect: 0.004 [-0.001, 0.015]
## Biased Estimate of Within-subjects Indirect Effect: 0.005 [0, 0.011]
## Bias in Within-subjects Indirect Effect: 0.001 [0, 0.007]
## 
## 
## #### Total Effect ####
## Unbiased Estimate of Total Effect: -0.031 [-0.09, 0.029]
## Biased Total Effect of X on Y (c path): -0.032 [-0.09, 0.029]
## Bias in Total Effect: 0.001 [0, 0.005]
## 
## 
## #### Direct Effects ####
## Direct Effect of Predictor on Dependent Variable (c' path): -0.036 [-0.095, 0.025]
## Within-subjects Effect of Predictor on Mediator (a path for group-mean centered predictor): 0.06 [-0.002, 0.117]
## Within-subjects Effect of Mediator on Dependent Variable (b path for group-mean centered mediator): 0.083 [0.054, 0.119]

exploratory moderation by cultural context

These analyses explore whether the analyses reported in study 1 of the main manuscript are moderated by cultural context (the Netherlands or the USA).

H1

Are the relationships between ROI activity and self and social relevance ratings moderated by cultural context?

self-referential ROI

These data are not consistent with moderation by cultural context.

mod_h1am =  lmer(self_relevance ~ self_referential * site + (1 + self_referential | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h1am = table_model(mod_h1am, print = FALSE)

table_h1am %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.61 [2.50, 2.72] 82.64 46.36 < .001
self-referential 0.04 [0.00, 0.08] 84.47 2.23 .028
sample (USA) -0.09 [-0.25, 0.06] 83.66 -1.21 .229
self-referential x sample (USA) 0.01 [-0.04, 0.06] 82.89 0.42 .673

simple slopes

simple_slopes(mod_h1am, "self_referential", "site")
site b [95% CI]
Netherlands 0.04 [0.01, 0.08]
USA 0.05 [0.02, 0.09]

summary

summary(mod_h1am)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_relevance ~ self_referential * site + (1 + self_referential |  
##     pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16756.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4404 -0.7064  0.1525  0.6834  2.3586 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr 
##  pID      (Intercept)      0.113508 0.3369        
##           self_referential 0.001841 0.0429   -0.72
##  Residual                  0.916497 0.9573        
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##                          Estimate Std. Error       df t value
## (Intercept)               2.60867    0.05627 82.64206  46.359
## self_referential          0.04301    0.01924 84.46525   2.235
## siteUSA                  -0.09399    0.07759 83.66019  -1.211
## self_referential:siteUSA  0.01116    0.02634 82.88805   0.424
##                                     Pr(>|t|)    
## (Intercept)              <0.0000000000000002 ***
## self_referential                      0.0281 *  
## siteUSA                               0.2291    
## self_referential:siteUSA              0.6730    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) slf_rf sitUSA
## self_rfrntl -0.220              
## siteUSA     -0.725  0.160       
## slf_rfr:USA  0.161 -0.731 -0.280

mentalizing ROI

These data are not consistent with moderation by cultural context.

mod_h1bm = lmer(social_relevance ~ mentalizing * site + (1 + mentalizing | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h1bm = table_model(mod_h1bm, print = FALSE)

table_h1bm %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.73 [2.61, 2.84] 81.87 45.55 < .001
mentalizing 0.04 [0.01, 0.08] 83.25 2.42 .018
sample (USA) -0.13 [-0.30, 0.03] 83.42 -1.61 .111
mentalizing x sample (USA) 0.01 [-0.04, 0.06] 82.63 0.29 .772

simple slopes

simple_slopes(mod_h1bm, "mentalizing", "site")
site b [95% CI]
Netherlands 0.04 [0.01, 0.08]
USA 0.05 [0.02, 0.09]

summary

summary(mod_h1bm)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: social_relevance ~ mentalizing * site + (1 + mentalizing | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 15837.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.8378 -0.7241  0.1647  0.6494  2.6771 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr 
##  pID      (Intercept) 0.131492 0.3626        
##           mentalizing 0.002735 0.0523   -0.10
##  Residual             0.781712 0.8841        
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##                      Estimate Std. Error        df t value            Pr(>|t|)
## (Intercept)          2.725840   0.059842 81.872635  45.550 <0.0000000000000002
## mentalizing          0.044925   0.018588 83.247748   2.417              0.0178
## siteUSA             -0.133130   0.082656 83.423642  -1.611              0.1110
## mentalizing:siteUSA  0.007401   0.025505 82.631141   0.290              0.7724
##                        
## (Intercept)         ***
## mentalizing         *  
## siteUSA                
## mentalizing:siteUSA    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) mntlzn sitUSA
## mentalizing -0.088              
## siteUSA     -0.724  0.064       
## mntlzng:USA  0.064 -0.729 -0.129

combined plot

vals = seq(-4.5,4.5,.1)

predicted_h1m = ggeffects::ggpredict(mod_h1am, c("self_referential [vals]", "site")) %>%
  data.frame() %>%
  mutate(roi = "self-referential",
         variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h1bm, c("mentalizing [vals]", "site")) %>%
              data.frame() %>%
              mutate(roi = "mentalizing",
                     variable = "social relevance"))

predicted_sub_h1m = ggeffects::ggpredict(mod_h1am, terms = c("self_referential [vals]", "site", "pID"), type = "random") %>%
  data.frame() %>%
  mutate(roi = "self-referential",
         variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h1bm, c("mentalizing [vals]", "site", "pID"), type = "random") %>%
              data.frame() %>%
              mutate(roi = "mentalizing",
                     variable = "social relevance")) %>%
  filter((group == "Netherlands" & grepl("A", facet)) | (group == "USA" & !grepl("A", facet)))

predicted_h1m %>%
  ggplot(aes(x, predicted, color = group, fill = group)) +
  stat_smooth(data = predicted_sub_h1m, aes(group = interaction(group, facet)),
              geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .3, color = NA) +
  geom_line(size = 2) +
  facet_grid(~variable) +
  scale_color_manual(name = "", values = palette_sample) +
  scale_fill_manual(name = "", values = palette_sample) +
  labs(x = "\nROI activity (SD)", y = "predicted rating\n") +
  plot_aes +
  theme(legend.position = "top",
        legend.key.width=unit(2,"cm"))

H2

Are the effects of the experimental manipulations on relevance moderated by cultural context?

self-relevance

These data are not consistent with moderation by cultural context.

mod_h2am = lmer(self_relevance ~ cond * site + (1 | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h2am = table_model(mod_h2am, print = FALSE)

table_h2am %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.58 [2.46, 2.70] 121.34 42.05 < .001
other 0.04 [-0.05, 0.12] 5925.43 0.83 .409
self 0.04 [-0.05, 0.13] 5925.22 0.89 .372
sample (USA) -0.05 [-0.22, 0.12] 121.28 -0.58 .560
other x sample (USA) -0.06 [-0.18, 0.06] 5925.30 -0.94 .347
self x sample (USA) -0.01 [-0.13, 0.11] 5925.33 -0.20 .843

simple slopes

simple_slopes(mod_h2am, "cond", "site", continuous = FALSE)
contrast site b [95% CI]
other - control Netherlands 0.04 [-0.05, 0.12]
other - control USA -0.02 [-0.10, 0.06]
self - control Netherlands 0.04 [-0.05, 0.13]
self - control USA 0.03 [-0.05, 0.11]

summary

summary(mod_h2am)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_relevance ~ cond * site + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16782.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.4248 -0.7129  0.1645  0.6768  2.3191 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1116   0.3340  
##  Residual             0.9207   0.9595  
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##                     Estimate Std. Error         df t value            Pr(>|t|)
## (Intercept)          2.58073    0.06137  121.33562  42.049 <0.0000000000000002
## condother            0.03648    0.04422 5925.43253   0.825               0.409
## condself             0.03942    0.04417 5925.21630   0.892               0.372
## siteUSA             -0.04932    0.08434  121.27906  -0.585               0.560
## condother:siteUSA   -0.05705    0.06073 5925.30374  -0.940               0.347
## condself:siteUSA    -0.01204    0.06073 5925.33153  -0.198               0.843
##                      
## (Intercept)       ***
## condother            
## condself             
## siteUSA              
## condother:siteUSA    
## condself:siteUSA     
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf sitUSA cndt:USA
## condother   -0.360                              
## condself    -0.361  0.501                       
## siteUSA     -0.728  0.262  0.262                
## cndthr:sUSA  0.262 -0.728 -0.364 -0.360         
## cndslf:sUSA  0.262 -0.364 -0.727 -0.360  0.500

social relevance

These data are not consistent with moderation by cultural context.

mod_h2bm = lmer(social_relevance ~ cond * site + (1 | pID),
               data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h2bm = table_model(mod_h2bm, print = FALSE)

table_h2bm %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.73 [2.60, 2.86] 111.11 42.58 < .001
other 0.02 [-0.06, 0.10] 5925.36 0.39 .694
self 0.00 [-0.08, 0.08] 5925.20 0.00 1.000
sample (USA) -0.16 [-0.33, 0.02] 111.07 -1.81 .074
other x sample (USA) 0.06 [-0.05, 0.17] 5925.26 0.99 .321
self x sample (USA) 0.09 [-0.02, 0.20] 5925.28 1.52 .128

simple slopes

simple_slopes(mod_h2bm, "cond", "site", continuous = FALSE)
contrast site b [95% CI]
other - control Netherlands 0.02 [-0.06, 0.10]
other - control USA 0.07 [-0.00, 0.15]
self - control Netherlands 0.00 [-0.08, 0.08]
self - control USA 0.09 [0.01, 0.16]

summary

summary(mod_h2bm)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: social_relevance ~ cond * site + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 15858.5
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.7612 -0.7255  0.1759  0.6447  2.7087 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1309   0.3618  
##  Residual             0.7863   0.8867  
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##                        Estimate    Std. Error            df t value
## (Intercept)          2.72870427    0.06408668  111.10557585  42.578
## condother            0.01605094    0.04086469 5925.36013519   0.393
## condself             0.00002353    0.04081853 5925.19692706   0.001
## siteUSA             -0.15897492    0.08807085  111.06536204  -1.805
## condother:siteUSA    0.05565863    0.05612022 5925.26319432   0.992
## condself:siteUSA     0.08555097    0.05612675 5925.28386303   1.524
##                              Pr(>|t|)    
## (Intercept)       <0.0000000000000002 ***
## condother                      0.6945    
## condself                       0.9995    
## siteUSA                        0.0738 .  
## condother:siteUSA              0.3213    
## condself:siteUSA               0.1275    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf sitUSA cndt:USA
## condother   -0.319                              
## condself    -0.319  0.501                       
## siteUSA     -0.728  0.232  0.232                
## cndthr:sUSA  0.232 -0.728 -0.364 -0.319         
## cndslf:sUSA  0.232 -0.364 -0.727 -0.319  0.500

combined plot

predicted_h2m = ggeffects::ggpredict(mod_h2am, c("cond", "site")) %>%
  data.frame() %>%
  mutate(model = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h2bm, c("cond", "site")) %>%
              data.frame() %>%
              mutate(model = "social relevance")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))

predicted_sub_h2m = ggeffects::ggpredict(mod_h2am, terms = c("cond", "site", "pID"), type = "random") %>%
  data.frame() %>%
  mutate(model = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h2bm, c("cond", "site", "pID"), type = "random") %>%
              data.frame() %>%
              mutate(model = "social relevance")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other"))) %>%
  filter((group == "Netherlands" & grepl("A", facet)) | (group == "USA" & !grepl("A", facet)))
  
predicted_h2m %>%
  ggplot(aes(x = x, y = predicted, color = group)) +
  stat_summary(data = predicted_sub_h2m, aes(group = interaction(group, facet)), fun = "mean", geom = "line", size = .1, alpha = .5) +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1) +
  geom_pointrange(aes(ymin = conf.low, ymax = conf.high, group = group),
                  size = .75) +
  facet_grid(~model) +
  scale_color_manual(name = "", values = palette_sample) +
  labs(x = "", y = "predicted rating\n") +
  plot_aes +
  theme(legend.position = c(.85, .15))

H3

Are the relationships between self and social relevance and sharing intentions moderated by cultural context?

These data are not consistent with moderation by cultural context.

mod_h3m = lmer(value ~ self_relevance_z * site + social_relevance_z * site + (1 + self_relevance_z + social_relevance_z | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

plot

predicted_h3m = ggeffects::ggpredict(mod_h3m, c("self_relevance_z", "site")) %>%
  data.frame() %>%
  mutate(variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h3m, c("social_relevance_z", "site")) %>%
              data.frame() %>%
              mutate(variable = "social relevance"))

predicted_sub_h3m = ggeffects::ggpredict(mod_h3m, terms = c("self_relevance_z", "site", "pID"), type = "random") %>%
  data.frame() %>%
  mutate(variable = "self-relevance") %>%
  bind_rows(ggeffects::ggpredict(mod_h3m, c("social_relevance_z", "site", "pID"), type = "random") %>%
              data.frame() %>%
              mutate(variable = "social relevance")) %>%
  filter((group == "Netherlands" & grepl("A", facet)) | (group == "USA" & !grepl("A", facet)))

predicted_h3m %>%
  ggplot(aes(x, predicted, color = group, fill = group)) +
  stat_smooth(data = predicted_sub_h3m, aes(group = interaction(group, facet)),
              geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .3, color = NA) +
  geom_line(size = 2) +
  facet_grid(~variable) +
  scale_color_manual(name = "", values = palette_sample) +
  scale_fill_manual(name = "", values = palette_sample) +
  labs(x = "\nrating (SD)", y = "predicted sharing intention\n") +
  plot_aes +
  theme(legend.key.width=unit(2,"cm"))

model table

table_h3m = table_model(mod_h3m, print = FALSE)

table_h3m %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.58 [2.50, 2.66] 82.76 65.19 < .001
self-relevance 0.33 [0.27, 0.39] 89.64 11.01 < .001
sample (USA) 0.10 [-0.01, 0.21] 82.28 1.78 .078
social relevance 0.22 [0.15, 0.29] 88.54 5.95 < .001
self-relevance x sample (USA) -0.03 [-0.11, 0.04] 84.71 -0.87 .385
sample (USA) x social relevance 0.04 [-0.06, 0.14] 82.51 0.79 .429

simple slopes

self-relevance

simple_slopes(mod_h3m, "self_relevance_z", "site", continuous = TRUE)
site b [95% CI]
Netherlands 0.33 [0.27, 0.39]
USA 0.29 [0.24, 0.35]

social -relevance

simple_slopes(mod_h3m, "social_relevance_z", "site", continuous = TRUE)
site b [95% CI]
Netherlands 0.22 [0.15, 0.29]
USA 0.26 [0.19, 0.32]

summary

summary(mod_h3m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ self_relevance_z * site + social_relevance_z * site +  
##     (1 + self_relevance_z + social_relevance_z | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 14914.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3723 -0.6976  0.0547  0.6917  3.0523 
## 
## Random effects:
##  Groups   Name               Variance Std.Dev. Corr       
##  pID      (Intercept)        0.05116  0.2262              
##           self_relevance_z   0.01209  0.1099   -0.38      
##           social_relevance_z 0.02958  0.1720    0.20 -0.55
##  Residual                    0.68672  0.8287              
## Number of obs: 5935, groups:  pID, 85
## 
## Fixed effects:
##                            Estimate Std. Error       df t value
## (Intercept)                 2.58183    0.03960 82.75581  65.195
## self_relevance_z            0.32807    0.02979 89.64063  11.013
## siteUSA                     0.09691    0.05435 82.28351   1.783
## social_relevance_z          0.22002    0.03698 88.54233   5.949
## self_relevance_z:siteUSA   -0.03484    0.03989 84.70853  -0.874
## siteUSA:social_relevance_z  0.03956    0.04977 82.51259   0.795
##                                        Pr(>|t|)    
## (Intercept)                < 0.0000000000000002 ***
## self_relevance_z           < 0.0000000000000002 ***
## siteUSA                                  0.0782 .  
## social_relevance_z                 0.0000000528 ***
## self_relevance_z:siteUSA                 0.3849    
## siteUSA:social_relevance_z               0.4289    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) slf_r_ sitUSA scl_r_ s__:US
## slf_rlvnc_z -0.200                            
## siteUSA     -0.729  0.146                     
## scl_rlvnc_z  0.112 -0.590 -0.082              
## slf_rl_:USA  0.149 -0.747 -0.210  0.441       
## stUSA:scl__ -0.083  0.439  0.129 -0.743 -0.593

H4

Are the effects of the experimental manipulations on ROI activity moderated by cultural context?

self-referential ROI

There is a main effect of site, such that the American cohort has greater activity in the self-referential ROI compared to the Dutch cohort.

These data are not consistent with moderation by cultural context.

mod_h4am = lmer(self_referential ~ cond * site + (1 + cond | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h4am = table_model(mod_h4am, print = FALSE)

table_h4am %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept -0.14 [-0.29, 0.01] 83.01 -1.87 .064
other 0.10 [-0.01, 0.22] 82.72 1.81 .074
self 0.08 [-0.05, 0.20] 82.60 1.23 .222
sample (USA) 0.43 [0.22, 0.63] 82.99 4.08 < .001
other x sample (USA) -0.03 [-0.19, 0.12] 82.55 -0.44 .663
self x sample (USA) 0.02 [-0.15, 0.19] 82.68 0.24 .814

simple slopes

simple_slopes(mod_h4am, "cond", "site", continuous = FALSE)
contrast site b [95% CI]
other - control Netherlands 0.10 [-0.01, 0.22]
other - control USA 0.07 [-0.04, 0.17]
self - control Netherlands 0.08 [-0.05, 0.20]
self - control USA 0.10 [-0.02, 0.21]

summary

summary(mod_h4am)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: self_referential ~ cond * site + (1 + cond | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 17277.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.7968 -0.6584  0.0043  0.6447  3.6151 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr       
##  pID      (Intercept) 0.18992  0.4358              
##           condother   0.04730  0.2175   -0.17      
##           condself    0.07548  0.2747   -0.10  0.59
##  Residual             0.97964  0.9898              
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##                   Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)       -0.14260    0.07608 83.01345  -1.874 0.064421 .  
## condother          0.10338    0.05713 82.72167   1.810 0.073998 .  
## condself           0.07752    0.06296 82.59520   1.231 0.221714    
## siteUSA            0.42645    0.10456 82.98789   4.079 0.000104 ***
## condother:siteUSA -0.03429    0.07848 82.55150  -0.437 0.663349    
## condself:siteUSA   0.02041    0.08655 82.67692   0.236 0.814160    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf sitUSA cndt:USA
## condother   -0.331                              
## condself    -0.282  0.536                       
## siteUSA     -0.728  0.241  0.205                
## cndthr:sUSA  0.241 -0.728 -0.390 -0.331         
## cndslf:sUSA  0.205 -0.390 -0.727 -0.281  0.536

mentalizing ROI

There is a main effect of site, such that the American cohort has greater activity in the self-referential ROI compared to the Dutch cohort.

These data are not consistent with moderation by cultural context.

mod_h4bm = lmer(mentalizing ~ cond * site + (1 + cond | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h4bm = table_model(mod_h4bm, print = FALSE)

table_h4bm %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 0.12 [-0.03, 0.27] 83.11 1.66 .102
other 0.11 [0.00, 0.22] 82.55 2.01 .047
self 0.07 [-0.06, 0.19] 82.65 1.10 .276
sample (USA) 0.38 [0.18, 0.59] 83.09 3.73 < .001
other x sample (USA) -0.10 [-0.25, 0.05] 82.37 -1.27 .208
self x sample (USA) 0.01 [-0.16, 0.18] 82.73 0.11 .915

simple slopes

simple_slopes(mod_h4bm, "cond", "site", continuous = FALSE)
contrast site b [95% CI]
other - control Netherlands 0.11 [0.00, 0.22]
other - control USA 0.01 [-0.09, 0.12]
self - control Netherlands 0.07 [-0.05, 0.19]
self - control USA 0.08 [-0.04, 0.19]

summary

summary(mod_h4bm)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: mentalizing ~ cond * site + (1 + cond | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 17283.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.6509 -0.6550  0.0194  0.6722  3.3344 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr       
##  pID      (Intercept) 0.18415  0.4291              
##           condother   0.03791  0.1947   -0.10      
##           condself    0.07119  0.2668   -0.07  0.63
##  Residual             0.98227  0.9911              
## Number of obs: 6014, groups:  pID, 85
## 
## Fixed effects:
##                   Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)        0.12439    0.07515 83.11419   1.655 0.101642    
## condother          0.11088    0.05509 82.55134   2.013 0.047401 *  
## condself           0.06819    0.06215 82.64788   1.097 0.275746    
## siteUSA            0.38494    0.10327 83.08754   3.727 0.000352 ***
## condother:siteUSA -0.09608    0.07568 82.36613  -1.270 0.207806    
## condself:siteUSA   0.00910    0.08544 82.73055   0.107 0.915436    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf sitUSA cndt:USA
## condother   -0.305                              
## condself    -0.264  0.544                       
## siteUSA     -0.728  0.222  0.192                
## cndthr:sUSA  0.222 -0.728 -0.396 -0.305         
## cndslf:sUSA  0.192 -0.396 -0.727 -0.264  0.544

combined plot

predicted_h4m = ggeffects::ggpredict(mod_h4am, c("cond", "site")) %>%
  data.frame() %>%
  mutate(atlas = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h4bm, c("cond", "site")) %>%
              data.frame() %>%
              mutate(atlas = "mentalizing")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

predicted_sub_h4m = ggeffects::ggpredict(mod_h4am, terms = c("cond", "site", "pID"), type = "random") %>%
  data.frame() %>%
  mutate(atlas = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h4bm, c("cond", "site", "pID"), type = "random") %>%
              data.frame() %>%
              mutate(atlas = "mentalizing")) %>%
  mutate(x = factor(x, levels = c("self", "control", "other")),
         atlas = factor(atlas, levels = c("self-referential", "mentalizing"))) %>%
  filter((group == "Netherlands" & grepl("A", facet)) | (group == "USA" & !grepl("A", facet)))

predicted_h4m %>%
  ggplot(aes(x = x, y = predicted, color = group)) +
  stat_summary(data = predicted_sub_h4m, aes(group = interaction(group, facet)), fun = "mean", geom = "line", size = .1) +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1, position = position_dodge(.1)) +
  geom_pointrange(aes(ymin = conf.low, ymax = conf.high, group = group),
                  size = .75, position = position_dodge(.1)) +
  facet_grid(~atlas) +
  scale_color_manual(name = "", values = palette_sample) +
  labs(x = "", y = "predicted ROI activity (SD)\n") +
  plot_aes +
  theme(legend.position = c(.18, .95))

H5

Are the effects of the experimental manipulations on sharing intentions moderated by cultural context?

These data are not consistent with moderation by cultural context.

mod_h5m = lmer(value ~ cond * site + (1 | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

plot

predicted_h5m = ggeffects::ggpredict(mod_h5m, c("cond", "site")) %>%
  data.frame() %>%
  mutate(x = factor(x, levels = c("self", "control", "other")))

predicted_sub_h5m = ggeffects::ggpredict(mod_h5m, terms = c("cond", "site", "pID"), type = "random") %>%
  data.frame() %>%
  mutate(x = factor(x, levels = c("self", "control", "other"))) %>%
    filter((group == "Netherlands" & grepl("A", facet)) | (group == "USA" & !grepl("A", facet)))
  
predicted_h5m %>%
  ggplot(aes(x = x, y = predicted, color = group)) +
  stat_summary(data = predicted_sub_h5m, aes(group = interaction(group, facet)), fun = "mean", geom = "line", size = .1) +
  stat_summary(aes(group = group), fun = "mean", geom = "line", size = 1, position = position_dodge(.1)) +
  geom_pointrange(aes(ymin = conf.low, ymax = conf.high, group = group),
                  size = .75, position = position_dodge(.1)) +
  scale_color_manual(name = "", values = palette_sample) +
  labs(x = "", y = "predicted sharing intention\n") +
  plot_aes +
  theme(legend.position = c(.85, .15))

model table

table_h5m = table_model(mod_h5m, print = FALSE)

table_h5m %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.61 [2.49, 2.73] 124.80 43.00 < .001
other -0.01 [-0.10, 0.08] 5846.72 -0.26 .792
self -0.05 [-0.14, 0.04] 5846.54 -1.07 .283
sample (USA) 0.06 [-0.11, 0.22] 124.32 0.70 .483
other x sample (USA) -0.04 [-0.16, 0.08] 5846.55 -0.63 .528
self x sample (USA) 0.01 [-0.11, 0.13] 5846.55 0.11 .909

simple slopes

simple_slopes(mod_h5m, "cond", "site", continuous = FALSE)
contrast site b [95% CI]
other - control Netherlands -0.01 [-0.10, 0.08]
other - control USA -0.05 [-0.13, 0.03]
self - control Netherlands -0.05 [-0.14, 0.04]
self - control USA -0.04 [-0.12, 0.04]

summary

summary(mod_h5m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ cond * site + (1 | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16682.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5651 -0.7038  0.1163  0.7265  2.0366 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  pID      (Intercept) 0.1071   0.3273  
##  Residual             0.9401   0.9696  
## Number of obs: 5935, groups:  pID, 85
## 
## Fixed effects:
##                      Estimate  Std. Error          df t value
## (Intercept)          2.614614    0.060805  124.802819  43.000
## condother           -0.011887    0.045106 5846.721712  -0.264
## condself            -0.048350    0.045042 5846.539744  -1.073
## siteUSA              0.058686    0.083487  124.324082   0.703
## condother:siteUSA   -0.038949    0.061781 5846.545043  -0.630
## condself:siteUSA     0.007039    0.061799 5846.553373   0.114
##                              Pr(>|t|)    
## (Intercept)       <0.0000000000000002 ***
## condother                       0.792    
## condself                        0.283    
## siteUSA                         0.483    
## condother:siteUSA               0.528    
## condself:siteUSA                0.909    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cndthr cndslf sitUSA cndt:USA
## condother   -0.371                              
## condself    -0.372  0.501                       
## siteUSA     -0.728  0.270  0.271                
## cndthr:sUSA  0.271 -0.730 -0.366 -0.370         
## cndslf:sUSA  0.271 -0.365 -0.729 -0.370  0.500

H6

Are the relationships between ROI activity positively and sharing intentions moderated by cultural context?

self-referential ROI

These data are not consistent with moderation by cultural context.

mod_h6am = lmer(value ~ self_referential * site + (1 + self_referential | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h6am = table_model(mod_h6am, print = FALSE)

table_h6am %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.60 [2.49, 2.71] 82.89 46.74 < .001
self-referential 0.06 [0.02, 0.10] 82.76 3.10 .003
sample (USA) 0.01 [-0.15, 0.16] 83.88 0.09 .928
self-referential x sample (USA) 0.04 [-0.01, 0.09] 81.13 1.51 .135

simple slopes

simple_slopes(mod_h6am, "self_referential", "site", continuous = TRUE)
site b [95% CI]
Netherlands 0.06 [0.02, 0.10]
USA 0.10 [0.07, 0.14]

summary

summary(mod_h6am)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ self_referential * site + (1 + self_referential | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16631.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.6103 -0.7252  0.1129  0.7407  2.3052 
## 
## Random effects:
##  Groups   Name             Variance Std.Dev. Corr 
##  pID      (Intercept)      0.109994 0.33165       
##           self_referential 0.002208 0.04699  -0.25
##  Residual                  0.930349 0.96455       
## Number of obs: 5935, groups:  pID, 85
## 
## Fixed effects:
##                           Estimate Std. Error        df t value
## (Intercept)               2.599556   0.055618 82.886354  46.739
## self_referential          0.061185   0.019764 82.757891   3.096
## siteUSA                   0.006967   0.076680 83.881555   0.091
## self_referential:siteUSA  0.040843   0.027064 81.132534   1.509
##                                      Pr(>|t|)    
## (Intercept)              < 0.0000000000000002 ***
## self_referential                      0.00268 ** 
## siteUSA                               0.92782    
## self_referential:siteUSA              0.13514    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) slf_rf sitUSA
## self_rfrntl -0.063              
## siteUSA     -0.725  0.046       
## slf_rfr:USA  0.046 -0.730 -0.123

mentalizing ROI

These data are not consistent with moderation by cultural context.

mod_h6bm = lmer(value ~ mentalizing * site + (1 + mentalizing | pID),
              data = merged_wide,
              control = lmerControl(optimizer = "bobyqa"))

model table

table_h6bm = table_model(mod_h6bm, print = FALSE)

table_h6bm %>%
    kable()  %>%
    kableExtra::kable_styling()
term b [95% CI] df t p
intercept 2.58 [2.47, 2.69] 82.49 46.45 < .001
mentalizing 0.06 [0.02, 0.10] 82.57 3.10 .003
sample (USA) 0.01 [-0.14, 0.17] 84.55 0.17 .865
mentalizing x sample (USA) 0.02 [-0.03, 0.08] 81.34 0.87 .389

simple slopes

simple_slopes(mod_h6bm, "mentalizing", "site", continuous = TRUE)
site b [95% CI]
Netherlands 0.06 [0.02, 0.10]
USA 0.08 [0.05, 0.12]

summary

summary(mod_h6bm)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ mentalizing * site + (1 + mentalizing | pID)
##    Data: merged_wide
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 16643.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.5921 -0.7264  0.1166  0.7388  2.2269 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr 
##  pID      (Intercept) 0.109493 0.33090       
##           mentalizing 0.002132 0.04618  -0.12
##  Residual             0.932303 0.96556       
## Number of obs: 5935, groups:  pID, 85
## 
## Fixed effects:
##                     Estimate Std. Error       df t value             Pr(>|t|)
## (Intercept)          2.58314    0.05561 82.48970  46.453 < 0.0000000000000002
## mentalizing          0.06101    0.01971 82.57202   3.096              0.00268
## siteUSA              0.01312    0.07693 84.55322   0.171              0.86497
## mentalizing:siteUSA  0.02341    0.02701 81.33591   0.867              0.38866
##                        
## (Intercept)         ***
## mentalizing         ** 
## siteUSA                
## mentalizing:siteUSA    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) mntlzn sitUSA
## mentalizing -0.098              
## siteUSA     -0.723  0.071       
## mntlzng:USA  0.071 -0.729 -0.147

combined plot

vals = seq(-4.5,4.5,.1)

predicted_h6m = ggeffects::ggpredict(mod_h6am, c("self_referential [vals]", "site")) %>%
  data.frame() %>%
  mutate(atlas = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h6bm, c("mentalizing [vals]", "site")) %>%
              data.frame() %>%
              mutate(atlas = "mentalizing")) %>%
  mutate(atlas = factor(atlas, levels = c("self-referential", "mentalizing")))

predicted_sub_h6m = ggeffects::ggpredict(mod_h6am, terms = c("self_referential [vals]", "site", "pID"), type = "random") %>%
  data.frame() %>%
  mutate(roi = "self-referential") %>%
  bind_rows(ggeffects::ggpredict(mod_h6bm, c("mentalizing [vals]", "site", "pID"), type = "random") %>%
              data.frame() %>%
              mutate(roi = "mentalizing")) %>%
  mutate(roi = factor(roi, levels = c("self-referential", "mentalizing"))) %>%
    filter((group == "Netherlands" & grepl("A", facet)) | (group == "USA" & !grepl("A", facet)))

predicted_h6m %>%
  ggplot(aes(x = x, y = predicted, color = group, fill = group)) +
  stat_smooth(data = predicted_sub_h6m, aes(group = interaction(group, facet)),
              geom ='line', method = "lm", alpha = .1, size = 1, se = FALSE) +
  geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .3, color = NA) +
  geom_line(size = 2) +
  facet_grid(~atlas) +
  scale_y_continuous(limits = c(1.5, 4), breaks = c(2:4)) +
  scale_color_manual(name = "", values = palette_sample) +
  scale_fill_manual(name = "", values = palette_sample) +
  labs(y = "predicted sharing intention\n", x = "\nROI activity (SD)") +
  plot_aes +
  theme(legend.position = "top")



combined table

table_h1am %>% mutate(DV = "H1a: Self-relevance") %>%
  bind_rows(table_h1bm %>% mutate(DV = "H1b: Social relevance")) %>%
  bind_rows(table_h2am %>% mutate(DV = "H2a: Self-relevance")) %>%
  bind_rows(table_h2bm %>% mutate(DV = "H2b: Social relevance")) %>%
  bind_rows(table_h3m %>% mutate(DV = "H3a-b: Sharing intention")) %>%
  bind_rows(table_h4am %>% mutate(DV = "H4a: Self-referential ROI")) %>%
  bind_rows(table_h4bm %>% mutate(DV = "H4b: Mentalizing ROI")) %>%
  bind_rows(table_h5m %>% mutate(DV = "H5: Sharing intention")) %>%
  bind_rows(table_h6am %>% mutate(DV = "H6a: Sharing intention")) %>%
  bind_rows(table_h6bm %>% mutate(DV = "H6b: Sharing intention")) %>%
  select(DV, everything()) %>%
  kable() %>%
  kable_styling()
DV term b [95% CI] df t p
H1a: Self-relevance intercept 2.61 [2.50, 2.72] 82.64 46.36 < .001
H1a: Self-relevance self-referential 0.04 [0.00, 0.08] 84.47 2.23 .028
H1a: Self-relevance sample (USA) -0.09 [-0.25, 0.06] 83.66 -1.21 .229
H1a: Self-relevance self-referential x sample (USA) 0.01 [-0.04, 0.06] 82.89 0.42 .673
H1b: Social relevance intercept 2.73 [2.61, 2.84] 81.87 45.55 < .001
H1b: Social relevance mentalizing 0.04 [0.01, 0.08] 83.25 2.42 .018
H1b: Social relevance sample (USA) -0.13 [-0.30, 0.03] 83.42 -1.61 .111
H1b: Social relevance mentalizing x sample (USA) 0.01 [-0.04, 0.06] 82.63 0.29 .772
H2a: Self-relevance intercept 2.58 [2.46, 2.70] 121.34 42.05 < .001
H2a: Self-relevance other 0.04 [-0.05, 0.12] 5925.43 0.83 .409
H2a: Self-relevance self 0.04 [-0.05, 0.13] 5925.22 0.89 .372
H2a: Self-relevance sample (USA) -0.05 [-0.22, 0.12] 121.28 -0.58 .560
H2a: Self-relevance other x sample (USA) -0.06 [-0.18, 0.06] 5925.30 -0.94 .347
H2a: Self-relevance self x sample (USA) -0.01 [-0.13, 0.11] 5925.33 -0.20 .843
H2b: Social relevance intercept 2.73 [2.60, 2.86] 111.11 42.58 < .001
H2b: Social relevance other 0.02 [-0.06, 0.10] 5925.36 0.39 .694
H2b: Social relevance self 0.00 [-0.08, 0.08] 5925.20 0.00 1.000
H2b: Social relevance sample (USA) -0.16 [-0.33, 0.02] 111.07 -1.81 .074
H2b: Social relevance other x sample (USA) 0.06 [-0.05, 0.17] 5925.26 0.99 .321
H2b: Social relevance self x sample (USA) 0.09 [-0.02, 0.20] 5925.28 1.52 .128
H3a-b: Sharing intention intercept 2.58 [2.50, 2.66] 82.76 65.19 < .001
H3a-b: Sharing intention self-relevance 0.33 [0.27, 0.39] 89.64 11.01 < .001
H3a-b: Sharing intention sample (USA) 0.10 [-0.01, 0.21] 82.28 1.78 .078
H3a-b: Sharing intention social relevance 0.22 [0.15, 0.29] 88.54 5.95 < .001
H3a-b: Sharing intention self-relevance x sample (USA) -0.03 [-0.11, 0.04] 84.71 -0.87 .385
H3a-b: Sharing intention sample (USA) x social relevance 0.04 [-0.06, 0.14] 82.51 0.79 .429
H4a: Self-referential ROI intercept -0.14 [-0.29, 0.01] 83.01 -1.87 .064
H4a: Self-referential ROI other 0.10 [-0.01, 0.22] 82.72 1.81 .074
H4a: Self-referential ROI self 0.08 [-0.05, 0.20] 82.60 1.23 .222
H4a: Self-referential ROI sample (USA) 0.43 [0.22, 0.63] 82.99 4.08 < .001
H4a: Self-referential ROI other x sample (USA) -0.03 [-0.19, 0.12] 82.55 -0.44 .663
H4a: Self-referential ROI self x sample (USA) 0.02 [-0.15, 0.19] 82.68 0.24 .814
H4b: Mentalizing ROI intercept 0.12 [-0.03, 0.27] 83.11 1.66 .102
H4b: Mentalizing ROI other 0.11 [0.00, 0.22] 82.55 2.01 .047
H4b: Mentalizing ROI self 0.07 [-0.06, 0.19] 82.65 1.10 .276
H4b: Mentalizing ROI sample (USA) 0.38 [0.18, 0.59] 83.09 3.73 < .001
H4b: Mentalizing ROI other x sample (USA) -0.10 [-0.25, 0.05] 82.37 -1.27 .208
H4b: Mentalizing ROI self x sample (USA) 0.01 [-0.16, 0.18] 82.73 0.11 .915
H5: Sharing intention intercept 2.61 [2.49, 2.73] 124.80 43.00 < .001
H5: Sharing intention other -0.01 [-0.10, 0.08] 5846.72 -0.26 .792
H5: Sharing intention self -0.05 [-0.14, 0.04] 5846.54 -1.07 .283
H5: Sharing intention sample (USA) 0.06 [-0.11, 0.22] 124.32 0.70 .483
H5: Sharing intention other x sample (USA) -0.04 [-0.16, 0.08] 5846.55 -0.63 .528
H5: Sharing intention self x sample (USA) 0.01 [-0.11, 0.13] 5846.55 0.11 .909
H6a: Sharing intention intercept 2.60 [2.49, 2.71] 82.89 46.74 < .001
H6a: Sharing intention self-referential 0.06 [0.02, 0.10] 82.76 3.10 .003
H6a: Sharing intention sample (USA) 0.01 [-0.15, 0.16] 83.88 0.09 .928
H6a: Sharing intention self-referential x sample (USA) 0.04 [-0.01, 0.09] 81.13 1.51 .135
H6b: Sharing intention intercept 2.58 [2.47, 2.69] 82.49 46.45 < .001
H6b: Sharing intention mentalizing 0.06 [0.02, 0.10] 82.57 3.10 .003
H6b: Sharing intention sample (USA) 0.01 [-0.14, 0.17] 84.55 0.17 .865
H6b: Sharing intention mentalizing x sample (USA) 0.02 [-0.03, 0.08] 81.34 0.87 .389

cite packages

report::cite_packages()
##   - Angelo Canty and Brian Ripley (2021). boot: Bootstrap R (S-Plus) Functions. R package version 1.3-28.
##   - Douglas Bates, Martin Maechler and Mikael Jagan (2023). Matrix: Sparse and Dense Matrix Classes and Methods. R package version 1.5-4. https://CRAN.R-project.org/package=Matrix
##   - Douglas Bates, Martin Maechler, Ben Bolker, Steve Walker (2015). Fitting Linear Mixed-Effects Models Using lme4. Journal of Statistical Software, 67(1), 1-48. doi:10.18637/jss.v067.i01.
##   - H. Wickham. ggplot2: Elegant Graphics for Data Analysis. Springer-Verlag New York, 2016.
##   - Hadley Wickham (2021). forcats: Tools for Working with Categorical Variables (Factors). R package version 0.5.1. https://CRAN.R-project.org/package=forcats
##   - Hadley Wickham (2022). stringr: Simple, Consistent Wrappers for Common String Operations. R package version 1.5.0. https://CRAN.R-project.org/package=stringr
##   - Hadley Wickham and Maximilian Girlich (2022). tidyr: Tidy Messy Data. R package version 1.2.0. https://CRAN.R-project.org/package=tidyr
##   - Hadley Wickham, Jennifer Bryan and Malcolm Barrett (2021). usethis: Automate Package and Project Setup. R package version 2.1.5. https://CRAN.R-project.org/package=usethis
##   - Hadley Wickham, Jim Hester and Jennifer Bryan (2022). readr: Read Rectangular Text Data. R package version 2.1.2. https://CRAN.R-project.org/package=readr
##   - Hadley Wickham, Jim Hester, Winston Chang and Jennifer Bryan (2021). devtools: Tools to Make Developing R Packages Easier. R package version 2.4.3. https://CRAN.R-project.org/package=devtools
##   - Hadley Wickham, Romain François, Lionel Henry and Kirill Müller (2022). dplyr: A Grammar of Data Manipulation. R package version 1.0.9. https://CRAN.R-project.org/package=dplyr
##   - Hao Zhu (2021). kableExtra: Construct Complex Table with 'kable' and Pipe Syntax. R package version 1.3.4. https://CRAN.R-project.org/package=kableExtra
##   - Jim Hester, Hadley Wickham and Gábor Csárdi (2021). fs: Cross-Platform File System Operations Based on 'libuv'. R package version 1.5.2. https://CRAN.R-project.org/package=fs
##   - Kirill Müller and Hadley Wickham (2022). tibble: Simple Data Frames. R package version 3.1.8. https://CRAN.R-project.org/package=tibble
##   - Kuznetsova A, Brockhoff PB, Christensen RHB (2017). "lmerTest Package:Tests in Linear Mixed Effects Models." _Journal of StatisticalSoftware_, *82*(13), 1-26. doi: 10.18637/jss.v082.i13 (URL:https://doi.org/10.18637/jss.v082.i13).
##   - Lionel Henry and Hadley Wickham (2020). purrr: Functional Programming Tools. R package version 0.3.4. https://CRAN.R-project.org/package=purrr
##   - Lüdecke D (2018). "ggeffects: Tidy Data Frames of Marginal Effects fromRegression Models." _Journal of Open Source Software_, *3*(26), 772.doi: 10.21105/joss.00772 (URL: https://doi.org/10.21105/joss.00772).
##   - R Core Team (2021). R: A language and environment for statistical computing. R Foundation for Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.
##   - Rinker, T. W. & Kurkiewicz, D. (2017). pacman: Package Management for R. version 0.5.0. Buffalo, New York. http://github.com/trinker/pacman
##   - Wickham et al., (2019). Welcome to the tidyverse. Journal of Open Source Software, 4(43), 1686, https://doi.org/10.21105/joss.01686
##   - Yihui Xie (2021). knitr: A General-Purpose Package for Dynamic Report Generation in R. R package version 1.37.